bitkeeper revision 1.1236.1.151 (424967aaR020l7m4GP2JkhW7oOgK9g)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 29 Mar 2005 14:35:22 +0000 (14:35 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 29 Mar 2005 14:35:22 +0000 (14:35 +0000)
Remove per-cpu batch queues for mmu updates and multicalls. Instead
batching is done locally within functions that most benefit.
Signed-off-by: Keir Fraser <keir@xensource.com>
25 files changed:
.rootkeys
linux-2.4.29-xen-sparse/arch/xen/kernel/ldt.c
linux-2.4.29-xen-sparse/arch/xen/kernel/process.c
linux-2.4.29-xen-sparse/arch/xen/kernel/setup.c
linux-2.4.29-xen-sparse/arch/xen/mm/fault.c
linux-2.4.29-xen-sparse/include/asm-xen/desc.h
linux-2.4.29-xen-sparse/include/asm-xen/mmu_context.h
linux-2.4.29-xen-sparse/mkbuildtree
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/traps.c
linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c
linux-2.6.11-xen-sparse/drivers/xen/balloon/balloon.c
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h
linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h
linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h
linux-2.6.11-xen-sparse/include/asm-xen/multicall.h [deleted file]

index fac49380c58559ece84cd990fc564275d33989f1..d69f91c7b64576e73747d0b1fbdea36e1db7f9bd 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h
 3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.11-xen-sparse/include/asm-xen/linux-public/privcmd.h
 3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.11-xen-sparse/include/asm-xen/linux-public/suspend.h
-40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.11-xen-sparse/include/asm-xen/multicall.h
 4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.11-xen-sparse/include/asm-xen/queues.h
 3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.11-xen-sparse/include/asm-xen/xen_proc.h
 419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.11-xen-sparse/include/linux/gfp.h
index 61fc1eb8247e9bd16a5123f3e1275494f16d6ccc..79ac73960d46a2a58bfe33efd2b51b77bdb101d9 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 
+#include <asm/mmu_context.h>
 #include <asm/uaccess.h>
 #include <asm/system.h>
 #include <asm/ldt.h>
@@ -58,7 +59,6 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
                        pc->ldt,
                        (pc->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
                load_LDT(pc);
-               flush_page_update_queue();
 #ifdef CONFIG_SMP
                if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
                        smp_call_function(flush_ldt, 0, 1, 1);
@@ -66,6 +66,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
        }
        wmb();
        if (oldsize) {
+               make_pages_writable(
+                       oldldt, (oldsize*LDT_ENTRY_SIZE)/PAGE_SIZE);
                if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
                        vfree(oldldt);
                else
@@ -84,7 +86,6 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
        }
        memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
        make_pages_readonly(new->ldt, (new->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
-       flush_page_update_queue();
        return 0;
 }
 
@@ -116,10 +117,11 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 void destroy_context(struct mm_struct *mm)
 {
        if (mm->context.size) {
+               if (mm_state_sync & STATE_SYNC_LDT)
+                       clear_LDT();
                make_pages_writable(
                        mm->context.ldt, 
                        (mm->context.size*LDT_ENTRY_SIZE)/PAGE_SIZE);
-               flush_page_update_queue();
                if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
                        vfree(mm->context.ldt);
                else
index bc97ceeac7d1425f89ea19b19e52e53387900fea..478184f504985d592cdbf70e38c0ddaad842af18 100644 (file)
@@ -43,7 +43,6 @@
 #include <asm/i387.h>
 #include <asm/desc.h>
 #include <asm/mmu_context.h>
-#include <asm/multicall.h>
 #include <asm-xen/xen-public/physdev.h>
 
 #include <linux/irq.h>
@@ -305,19 +304,36 @@ void fastcall __switch_to(struct task_struct *prev_p, struct task_struct *next_p
 {
     struct thread_struct *next = &next_p->thread;
     physdev_op_t op;
+    multicall_entry_t _mcl[8], *mcl = _mcl;
+    mmu_update_t _mmu[2], *mmu = _mmu;
 
-    __cli();
+    if ( mm_state_sync & STATE_SYNC_PT )
+    {
+        mmu->ptr = virt_to_machine(cur_pgd) | MMU_EXTENDED_COMMAND;
+        mmu->val = MMUEXT_NEW_BASEPTR;
+        mmu++;
+    }
 
-    /*
-     * We clobber FS and GS here so that we avoid a GPF when restoring previous
-     * task's FS/GS values in Xen when the LDT is switched. If we don't do this
-     * then we can end up erroneously re-flushing the page-update queue when
-     * we 'execute_multicall_list'.
-     */
-    __asm__ __volatile__ ( 
-        "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : "eax" );
+    if ( mm_state_sync & STATE_SYNC_LDT )
+    {
+        __asm__ __volatile__ ( 
+            "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : "eax" );
+        mmu->ptr = (unsigned long)next_p->mm->context.ldt |
+            MMU_EXTENDED_COMMAND;
+        mmu->val = (next_p->mm->context.size << MMUEXT_CMD_SHIFT) |
+            MMUEXT_SET_LDT;
+        mmu++;
+    }
 
-    MULTICALL_flush_page_update_queue();
+    if ( mm_state_sync != 0 )
+    {
+        mcl->op      = __HYPERVISOR_mmu_update;
+        mcl->args[0] = (unsigned long)_mmu;
+        mcl->args[1] = mmu - _mmu;
+        mcl->args[2] = 0;
+        mcl++;
+        mm_state_sync = 0;
+    }
 
     /*
      * This is basically 'unlazy_fpu', except that we queue a multicall to 
@@ -332,21 +348,26 @@ void fastcall __switch_to(struct task_struct *prev_p, struct task_struct *next_p
             asm volatile( "fnsave %0 ; fwait"
                           : "=m" (prev_p->thread.i387.fsave) );
        prev_p->flags &= ~PF_USEDFPU;
-        queue_multicall1(__HYPERVISOR_fpu_taskswitch, 1);
+        mcl->op      = __HYPERVISOR_fpu_taskswitch;
+        mcl->args[0] = 1;
+        mcl++;
     }
 
-    queue_multicall2(__HYPERVISOR_stack_switch, __KERNEL_DS, next->esp0);
+    mcl->op      = __HYPERVISOR_stack_switch;
+    mcl->args[0] = __KERNEL_DS;
+    mcl->args[1] = next->esp0;
+    mcl++;
 
     if ( prev_p->thread.io_pl != next->io_pl ) 
     {
         op.cmd             = PHYSDEVOP_SET_IOPL;
        op.u.set_iopl.iopl = next->io_pl;
-        queue_multicall1(__HYPERVISOR_physdev_op, (unsigned long)&op);
+        mcl->op      = __HYPERVISOR_physdev_op;
+        mcl->args[0] = (unsigned long)&op;
+        mcl++;
     }
 
-    /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
-    execute_multicall_list();
-    __sti();
+    (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
 
     /*
      * Restore %fs and %gs.
index 63317263ee42a722b5c62484c47410cbe775bbb4..5d3529a80ae2bcf3a0fcdb65f0965a3f6d5a2770 100644 (file)
@@ -62,9 +62,6 @@ shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
 
 unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
 
-DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
-DEFINE_PER_CPU(int, nr_multicall_ents);
-
 /*
  * Machine setup..
  */
@@ -1206,7 +1203,6 @@ void __init cpu_init (void)
     HYPERVISOR_stack_switch(__KERNEL_DS, current->thread.esp0);
 
     load_LDT(&init_mm.context);
-    flush_page_update_queue();
 
     /* Force FPU initialization. */
     current->flags &= ~PF_USEDFPU;
index 7db6463e0924444a0563fde38a8844d47003f106..1fd1b4c1494066266150f77dea8ae9a3c8ec4cf3 100644 (file)
@@ -28,6 +28,7 @@
 extern void die(const char *,struct pt_regs *,long);
 
 pgd_t *cur_pgd;
+int mm_state_sync;
 
 extern spinlock_t timerlist_lock;
 
index 7eaacabf2289e6339a6965ef1b75f2d7886082b2..b59b998d951909b0cdd60bce2bbf3667c60f68b2 100644 (file)
@@ -16,6 +16,11 @@ struct Xgt_desc_struct {
 
 extern struct desc_struct default_ldt[];
 
+static inline void clear_LDT(void)
+{
+    xen_set_ldt(0, 0);
+}
+
 static inline void load_LDT(mm_context_t *pc)
 {
     void *segments = pc->ldt;
index 7972ce7d74b3fd8311467b4118aee6d11293ad7c..60c245e408c5f5fac0649fcd3673edd88b6a24ac 100644 (file)
@@ -28,47 +28,34 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk,
 #endif
 
 extern pgd_t *cur_pgd;
+extern int mm_state_sync;
+#define STATE_SYNC_PT  1
+#define STATE_SYNC_LDT 2
 
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
 {
        if (prev != next) {
                /* stop flush ipis for the previous mm */
                clear_bit(cpu, &prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
-               cpu_tlbstate[cpu].state = TLBSTATE_OK;
-               cpu_tlbstate[cpu].active_mm = next;
-#endif
-
                /* Re-load page tables */
                cur_pgd = next->pgd;
-               queue_pt_switch(__pa(cur_pgd));
-                /* load_LDT, if either the previous or next thread
-                 * has a non-default LDT.
-                 */
-                if (next->context.size+prev->context.size)
-                        load_LDT(&next->context);
+               mm_state_sync |= STATE_SYNC_PT;
+               /* load_LDT, if either the previous or next thread
+                * has a non-default LDT.
+                */
+               if (next->context.size+prev->context.size)
+                       mm_state_sync |= STATE_SYNC_LDT;
        }
-#ifdef CONFIG_SMP
-       else {
-               cpu_tlbstate[cpu].state = TLBSTATE_OK;
-               if(cpu_tlbstate[cpu].active_mm != next)
-                       out_of_line_bug();
-               if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
-                       /* We were in lazy tlb mode and leave_mm disabled 
-                        * tlb flush IPI delivery. We must reload %cr3.
-                        */
-                       cur_pgd = next->pgd;
-                       queue_pt_switch(__pa(cur_pgd));
-                       load_LDT(next);
-               }
-       }
-#endif
 }
 
-#define activate_mm(prev, next) \
-do { \
-       switch_mm((prev),(next),NULL,smp_processor_id()); \
-       flush_page_update_queue(); \
+#define activate_mm(prev, next)                                 \
+do {                                                            \
+       switch_mm((prev),(next),NULL,smp_processor_id());       \
+       if (mm_state_sync & STATE_SYNC_PT)                      \
+               xen_pt_switch(__pa(cur_pgd));                   \
+       if (mm_state_sync & STATE_SYNC_LDT)                     \
+               load_LDT(&(next)->context);                     \
+       mm_state_sync = 0;                                      \
 } while ( 0 )
 
 #endif
index 15707041d069baf0617bd5663df96378a7b0c97e..f086298a69db6aa4ed27dddb47b485df6d2aafd5 100755 (executable)
@@ -210,7 +210,6 @@ ln -sf ../../${LINUX_26}/include/asm-xen/ctrl_if.h
 ln -sf ../../${LINUX_26}/include/asm-xen/evtchn.h
 ln -sf ../../${LINUX_26}/include/asm-xen/gnttab.h
 ln -sf ../../${LINUX_26}/include/asm-xen/hypervisor.h
-ln -sf ../../${LINUX_26}/include/asm-xen/multicall.h
 ln -sf ../../${LINUX_26}/include/asm-xen/xen_proc.h
 ln -sf ../../${LINUX_26}/include/asm-xen/asm-i386/synch_bitops.h
 
index 2d2b7afc71918205b7bfc5c47a3dfc8b423002fa..e63fa420a0cc70bfdb5aabe9db37a71074588b0a 100644 (file)
@@ -564,7 +564,6 @@ void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
                frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
                make_page_readonly((void *)va);
        }
-       flush_page_update_queue();
        if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
                BUG();
        lgdt_finish();
@@ -622,7 +621,6 @@ void __init cpu_init (void)
        load_esp0(t, thread);
 
        load_LDT(&init_mm.context);
-       flush_page_update_queue();
 
        /* Clear %fs and %gs. */
        asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
index 53f031503935dd7fca22ccb417b189d8e8714cf4..a8380fb85b62c78cdf8d26063b10e33254cfe4a6 100644 (file)
 #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
 static void flush_ldt(void *null)
 {
-       if (current->active_mm) {
+       if (current->active_mm)
                load_LDT(&current->active_mm->context);
-               flush_page_update_queue();
-       }
 }
 #endif
 
@@ -64,7 +62,6 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
                make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
                                    PAGE_SIZE);
                load_LDT(pc);
-               flush_page_update_queue();
 #ifdef CONFIG_SMP
                mask = cpumask_of_cpu(smp_processor_id());
                if (!cpus_equal(current->mm->cpu_vm_mask, mask))
@@ -75,7 +72,6 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
        if (oldsize) {
                make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
                        PAGE_SIZE);
-               flush_page_update_queue();
                if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
                        vfree(oldldt);
                else
@@ -92,7 +88,6 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
        memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
        make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
                            PAGE_SIZE);
-       flush_page_update_queue();
        return 0;
 }
 
@@ -127,7 +122,6 @@ void destroy_context(struct mm_struct *mm)
                make_pages_writable(mm->context.ldt, 
                                    (mm->context.size * LDT_ENTRY_SIZE) /
                                    PAGE_SIZE);
-               flush_page_update_queue();
                if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
                        vfree(mm->context.ldt);
                else
index 8cb2c649594c518ae822a4d47e99a818c2aa1e13..1d8f7814541a54f8e6db868b2403f6ddb3b3abee 100644 (file)
@@ -54,10 +54,10 @@ xen_contig_memory(unsigned long vstart, unsigned int order)
                pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
                pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
                pfn = pte->pte_low >> PAGE_SHIFT;
-               queue_l1_entry_update(pte, 0);
+               HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
                phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
                        INVALID_P2M_ENTRY;
-               flush_page_update_queue();
                if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
                                          &pfn, 1, 0) != 1) BUG();
        }
@@ -70,14 +70,14 @@ xen_contig_memory(unsigned long vstart, unsigned int order)
                pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
                pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
                pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-               queue_l1_entry_update(
-                       pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
-               queue_machphys_update(
+               HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE),
+                       __pte_ma(((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0);
+               xen_machphys_update(
                        pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
                phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
                        pfn+i;
        }
-       /* Flush updates through and flush the TLB. */
        xen_tlb_flush();
 
         balloon_unlock(flags);
index fdfc1f6207217bb9bd9893be218af664f14a1819..e8f169fbca76ff6b807ba8e37b90c8f192b2f2c2 100644 (file)
@@ -46,7 +46,6 @@
 #include <asm/i387.h>
 #include <asm/irq.h>
 #include <asm/desc.h>
-#include <asm-xen/multicall.h>
 #include <asm-xen/xen-public/physdev.h>
 #ifdef CONFIG_MATH_EMULATION
 #include <asm/math_emu.h>
@@ -444,9 +443,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
        physdev_op_t iopl_op, iobmp_op;
-
-        /* NB. No need to disable interrupts as already done in sched.c */
-        /* __cli(); */
+       multicall_entry_t _mcl[8], *mcl = _mcl;
 
        /*
         * Save away %fs and %gs. No need to save %es and %ds, as
@@ -463,8 +460,6 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
                "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
                "eax" );
 
-       MULTICALL_flush_page_update_queue();
-
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
        /*
@@ -474,7 +469,9 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
         */
        if (prev_p->thread_info->status & TS_USEDFPU) {
                __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-               queue_multicall1(__HYPERVISOR_fpu_taskswitch, 1);
+               mcl->op      = __HYPERVISOR_fpu_taskswitch;
+               mcl->args[0] = 1;
+               mcl++;
        }
 
        /*
@@ -482,20 +479,25 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
         * This is load_esp0(tss, next) with a multicall.
         */
        tss->esp0 = next->esp0;
-       queue_multicall2(__HYPERVISOR_stack_switch, tss->ss0, tss->esp0);
+       mcl->op      = __HYPERVISOR_stack_switch;
+       mcl->args[0] = tss->ss0;
+       mcl->args[1] = tss->esp0;
+       mcl++;
 
        /*
         * Load the per-thread Thread-Local Storage descriptor.
         * This is load_TLS(next, cpu) with multicalls.
         */
-#define C(i) do {                                                          \
-       if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||        \
-                    next->tls_array[i].b != prev->tls_array[i].b))         \
-               queue_multicall3(__HYPERVISOR_update_descriptor,            \
-                                virt_to_machine(&get_cpu_gdt_table(cpu)    \
-                                                [GDT_ENTRY_TLS_MIN + i]),  \
-                                ((u32 *)&next->tls_array[i])[0],           \
-                                ((u32 *)&next->tls_array[i])[1]);          \
+#define C(i) do {                                                       \
+       if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||    \
+                    next->tls_array[i].b != prev->tls_array[i].b)) {   \
+               mcl->op      = __HYPERVISOR_update_descriptor;          \
+               mcl->args[0] = virt_to_machine(&get_cpu_gdt_table(cpu)  \
+                                        [GDT_ENTRY_TLS_MIN + i]);      \
+               mcl->args[1] = ((u32 *)&next->tls_array[i])[0];         \
+               mcl->args[2] = ((u32 *)&next->tls_array[i])[1];         \
+               mcl++;                                                  \
+       }                                                               \
 } while (0)
        C(0); C(1); C(2);
 #undef C
@@ -503,8 +505,9 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        if (unlikely(prev->io_pl != next->io_pl)) {
                iopl_op.cmd             = PHYSDEVOP_SET_IOPL;
                iopl_op.u.set_iopl.iopl = next->io_pl;
-               queue_multicall1(__HYPERVISOR_physdev_op,
-                               (unsigned long)&iopl_op);
+               mcl->op      = __HYPERVISOR_physdev_op;
+               mcl->args[0] = (unsigned long)&iopl_op;
+               mcl++;
        }
 
        if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
@@ -514,13 +517,12 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
                        (unsigned long)next->io_bitmap_ptr;
                iobmp_op.u.set_iobitmap.nr_ports =
                        next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-               queue_multicall1(__HYPERVISOR_physdev_op,
-                               (unsigned long)&iobmp_op);
+               mcl->op      = __HYPERVISOR_physdev_op;
+               mcl->args[0] = (unsigned long)&iobmp_op;
+               mcl++;
        }
 
-       /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
-       execute_multicall_list();
-        /* __sti(); */
+       (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
 
        /*
         * Restore %fs and %gs if needed.
index 7715a35b6804cb44d242c5f16e346491f2731c4b..8cd00c343ef8992b2ee48b4a5144f0bad03e92e3 100644 (file)
@@ -362,9 +362,6 @@ EXPORT_SYMBOL(HYPERVISOR_shared_info);
 unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
 EXPORT_SYMBOL(phys_to_machine_mapping);
 
-DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
-DEFINE_PER_CPU(int, nr_multicall_ents);
-
 /* Raw start-of-day parameters from the hypervisor. */
 union xen_start_info_union xen_start_info_union;
 
index 9f39dde976e893752b2fd04aece90a83328d98f1..0f271d80ad2fff3a692dba30f8af63b4da00e47e 100644 (file)
@@ -898,7 +898,6 @@ static int __init do_boot_cpu(int apicid)
                        make_page_readonly((void *)va);
                }
                ctxt.gdt_ents = cpu_gdt_descr[cpu].size / 8;
-               flush_page_update_queue();
        }
 
        /* Ring 1 stack is the initial stack. */
index b7c29174fc37671222bd4fe27f850a366baac076..f56957f6e67ed1608c6eeba685692826c78218f3 100644 (file)
@@ -963,7 +963,6 @@ void __init trap_init(void)
         * and a callgate to lcall27 for Solaris/x86 binaries
         */
        make_lowmem_page_readonly(&default_ldt[0]);
-       flush_page_update_queue();
 
        /*
         * Should be a barrier for any external CPU state.
index f9d8e089e06903f9339688b0292d4c6d8409a835..525576243bcd10ae65d2a6583dfb05cd3ccdab47 100644 (file)
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm-xen/hypervisor.h>
-#include <asm-xen/multicall.h>
 #include <asm-xen/balloon.h>
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 #include <linux/percpu.h>
 #endif
 
-/*
- * This suffices to protect us if we ever move to SMP domains.
- * Further, it protects us against interrupts. At the very least, this is
- * required for the network driver which flushes the update queue before
- * pushing new receive buffers.
- */
-static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
-
-#define QUEUE_SIZE 128
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 #define pte_offset_kernel pte_offset
 #define pud_t pgd_t
@@ -57,366 +47,92 @@ static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
 #define pmd_val_ma(v) (v).pud.pgd.pgd;
 #endif
 
-DEFINE_PER_CPU(mmu_update_t, update_queue[QUEUE_SIZE]);
-DEFINE_PER_CPU(unsigned int, mmu_update_queue_idx);
-
-/*
- * MULTICALL_flush_page_update_queue:
- *   This is a version of the flush which queues as part of a multicall.
- */
-void MULTICALL_flush_page_update_queue(void)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    unsigned int _idx;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    if ( (_idx = idx) != 0 ) 
-    {
-        per_cpu(mmu_update_queue_idx, cpu) = 0;
-        wmb(); /* Make sure index is cleared first to avoid double updates. */
-        queue_multicall3(__HYPERVISOR_mmu_update, 
-                         (unsigned long)&per_cpu(update_queue[0], cpu), 
-                         (unsigned long)_idx, 
-                         (unsigned long)NULL);
-    }
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void __flush_page_update_queue(void)
-{
-    int cpu = smp_processor_id();
-    unsigned int _idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(mmu_update_queue_idx, cpu) = 0;
-    wmb(); /* Make sure index is cleared first to avoid double updates. */
-    if ( unlikely(HYPERVISOR_mmu_update(&per_cpu(update_queue[0], cpu), _idx, NULL) < 0) )
-    {
-        printk(KERN_ALERT "Failed to execute MMU updates.\n");
-        BUG();
-    }
-}
-
-void _flush_page_update_queue(void)
-{
-    int cpu = smp_processor_id();
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    if ( per_cpu(mmu_update_queue_idx, cpu) != 0 ) __flush_page_update_queue();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void increment_index(void)
-{
-    int cpu = smp_processor_id();
-    per_cpu(mmu_update_queue_idx, cpu)++;
-    if ( unlikely(per_cpu(mmu_update_queue_idx, cpu) == QUEUE_SIZE) ) __flush_page_update_queue();
-}
-
-static inline void increment_index_and_flush(void)
-{
-    int cpu = smp_processor_id();
-    per_cpu(mmu_update_queue_idx, cpu)++;
-    __flush_page_update_queue();
-}
-
-void queue_l1_entry_update(pte_t *ptr, unsigned long val)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).val = val;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_l2_entry_update(pmd_t *ptr, pmd_t val)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).val = pmd_val_ma(val);
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pt_switch(unsigned long ptr)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_NEW_BASEPTR;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_tlb_flush(void)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_TLB_FLUSH;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_invlpg(unsigned long ptr)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_INVLPG;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pgd_pin(unsigned long ptr)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_PIN_L2_TABLE;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pgd_unpin(unsigned long ptr)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_UNPIN_TABLE;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pte_pin(unsigned long ptr)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_PIN_L1_TABLE;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pte_unpin(unsigned long ptr)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_UNPIN_TABLE;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_set_ldt(unsigned long ptr, unsigned long len)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = MMU_EXTENDED_COMMAND | ptr;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_machphys_update(unsigned long mfn, unsigned long pfn)
-{
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-    per_cpu(update_queue[idx], cpu).val = pfn;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
-}
-
-/* queue and flush versions of the above */
 void xen_l1_entry_update(pte_t *ptr, unsigned long val)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).val = val;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = virt_to_machine(ptr);
+    u.val = val;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).val = pmd_val_ma(val);
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = virt_to_machine(ptr);
+    u.val = pmd_val_ma(val);
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_pt_switch(unsigned long ptr)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_NEW_BASEPTR;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+    u.val = MMUEXT_NEW_BASEPTR;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_tlb_flush(void)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_TLB_FLUSH;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = MMU_EXTENDED_COMMAND;
+    u.val = MMUEXT_TLB_FLUSH;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_invlpg(unsigned long ptr)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_INVLPG;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = (ptr & PAGE_MASK) | MMU_EXTENDED_COMMAND;
+    u.val = MMUEXT_INVLPG;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_pgd_pin(unsigned long ptr)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_PIN_L2_TABLE;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+    u.val = MMUEXT_PIN_L2_TABLE;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_pgd_unpin(unsigned long ptr)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_UNPIN_TABLE;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+    u.val = MMUEXT_UNPIN_TABLE;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_pte_pin(unsigned long ptr)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_PIN_L1_TABLE;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+    u.val = MMUEXT_PIN_L1_TABLE;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_pte_unpin(unsigned long ptr)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = phys_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_UNPIN_TABLE;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+    u.val = MMUEXT_UNPIN_TABLE;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_set_ldt(unsigned long ptr, unsigned long len)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr  = MMU_EXTENDED_COMMAND | ptr;
-    per_cpu(update_queue[idx], cpu).val  = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = ptr | MMU_EXTENDED_COMMAND;
+    u.val = (len << MMUEXT_CMD_SHIFT) | MMUEXT_SET_LDT;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 void xen_machphys_update(unsigned long mfn, unsigned long pfn)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-    per_cpu(update_queue[idx], cpu).val = pfn;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    mmu_update_t u;
+    u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+    u.val = pfn;
+    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
 }
 
 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
@@ -449,11 +165,10 @@ unsigned long allocate_empty_lowmem_region(unsigned long pages)
         pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
         pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
         pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
-        queue_l1_entry_update(pte, 0);
+        HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
         phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
     }
 
-    /* Flush updates through and flush the TLB. */
     xen_tlb_flush();
 
     balloon_put_pages(pfn_array, 1 << order);
index fa9aac890ed4cbe2b8b56f5c3e8349e6c93de6c9..e22c3e1932cf9b3d573344c7b5731716b15114c5 100644 (file)
@@ -357,7 +357,6 @@ static void __init pagetable_init (void)
        make_page_readonly(new_pgd);
        xen_pgd_pin(__pa(new_pgd));
        load_cr3(new_pgd);
-       flush_page_update_queue();
        xen_pgd_unpin(__pa(old_pgd));
        make_page_writable(old_pgd);
        __flush_tlb_all();
index ff82cd2f0a32fa9affcb2c73acca15eb8bfce291..7e1f6ce5b559264197bf950d9ab48bf27383cb65 100644 (file)
@@ -139,24 +139,6 @@ static struct page *balloon_retrieve(void)
     return page;
 }
 
-static inline pte_t *get_ptep(unsigned long addr)
-{
-    pgd_t *pgd;
-    pud_t *pud;
-    pmd_t *pmd;
-
-    pgd = pgd_offset_k(addr);
-    if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
-
-    pud = pud_offset(pgd, addr);
-    if ( pud_none(*pud) || pud_bad(*pud) ) BUG();
-
-    pmd = pmd_offset(pud, addr);
-    if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
-
-    return pte_offset_kernel(pmd, addr);
-}
-
 static void balloon_alarm(unsigned long unused)
 {
     schedule_work(&balloon_worker);
@@ -220,14 +202,18 @@ static void balloon_process(void *unused)
 
             /* Update P->M and M->P tables. */
             phys_to_machine_mapping[pfn] = mfn_list[i];
-            queue_machphys_update(mfn_list[i], pfn);
+            xen_machphys_update(mfn_list[i], pfn);
             
             /* Link back into the page tables if it's not a highmem page. */
             if ( pfn < max_low_pfn )
-                queue_l1_entry_update(
-                    get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
-                    (mfn_list[i] << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
-            
+            {
+                HYPERVISOR_update_va_mapping(
+                    (unsigned long)__va(pfn << PAGE_SHIFT),
+                    __pte_ma((mfn_list[i] << PAGE_SHIFT) |
+                             pgprot_val(PAGE_KERNEL)),
+                    0);
+            }
+
             /* Finally, relinquish the memory back to the system allocator. */
             ClearPageReserved(page);
             set_page_count(page, 1);
@@ -259,7 +245,8 @@ static void balloon_process(void *unused)
             {
                 v = phys_to_virt(pfn << PAGE_SHIFT);
                 scrub_pages(v, 1);
-                queue_l1_entry_update(get_ptep((unsigned long)v), 0);
+                HYPERVISOR_update_va_mapping(
+                    (unsigned long)v, __pte_ma(0), 0);
             }
 #ifdef CONFIG_XEN_SCRUB_PAGES
             else
@@ -274,7 +261,6 @@ static void balloon_process(void *unused)
         /* Ensure that ballooned highmem pages don't have cached mappings. */
         kmap_flush_unused();
 
-        /* Flush updates through and flush the TLB. */
         xen_tlb_flush();
 
         /* No more mappings: invalidate pages in P2M and add to balloon. */
index abc3987d71b9ab8459fc2d4bdef9eea46b995c81..85f022109c2015cd120273f35e93a20726e2e0dd 100644 (file)
@@ -103,7 +103,7 @@ static inline void clear_LDT(void)
         * it slows down context switching. Noone uses it anyway.
         */
        cpu = cpu;              /* XXX avoid compiler warning */
-       queue_set_ldt(0UL, 0);
+       xen_set_ldt(0UL, 0);
        put_cpu();
 }
 
@@ -118,7 +118,7 @@ static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
        if (likely(!count))
                segments = NULL;
 
-       queue_set_ldt((unsigned long)segments, count);
+       xen_set_ldt((unsigned long)segments, count);
 }
 
 static inline void load_LDT(mm_context_t *pc)
index a815fad09dfb310b9ac34bb6446db484d6bdc70a..de53523986ee8f4f8b0a28bf4fd0f99efbabdeb1 100644 (file)
@@ -68,7 +68,6 @@ static inline void switch_mm(struct mm_struct *prev,
 
 #define activate_mm(prev, next) do {           \
        switch_mm((prev),(next),NULL);          \
-       flush_page_update_queue();              \
 } while (0)
 
 #endif
index 520e98d072060885ee5681ad95299f41efd19e87..e278d7098e12f7a5b729865c4c5ad0a3ac4fe3f4 100644 (file)
@@ -9,13 +9,12 @@
 #include <asm/io.h>            /* for phys_to_virt and page_to_pseudophys */
 
 #define pmd_populate_kernel(mm, pmd, pte) \
-               set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+       set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
 
 #define pmd_populate(mm, pmd, pte) do {                                \
        set_pmd(pmd, __pmd(_PAGE_TABLE +                        \
                ((unsigned long long)page_to_pfn(pte) <<        \
                        (unsigned long long) PAGE_SHIFT)));     \
-       flush_page_update_queue();                              \
 } while (0)
 /*
  * Allocate and free page tables.
@@ -30,7 +29,6 @@ static inline void pte_free_kernel(pte_t *pte)
 {
        free_page((unsigned long)pte);
        make_page_writable(pte);
-       flush_page_update_queue();
 }
 
 extern void pte_free(struct page *pte);
index dfc5b1e155ad0781a1ca439e161e1561f5e7504f..325cf468b5db275b89a92315b11efa1cc6e0c757 100644 (file)
@@ -432,7 +432,6 @@ do {                                                                        \
        }                                                               \
 } while (0)
 
-/* NOTE: make_page* callers must call flush_page_update_queue() */
 void make_lowmem_page_readonly(void *va);
 void make_lowmem_page_writable(void *va);
 void make_page_readonly(void *va);
index 4173a36fe3d73d684b36e6e96ec22581f7d20575..fd54b409e2ca9d6da07fe7fa457723a7b85c9ae0 100644 (file)
@@ -193,7 +193,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
 }
 
 #define load_cr3(pgdir) do {                           \
-       queue_pt_switch(__pa(pgdir));                   \
+       xen_pt_switch(__pa(pgdir));                     \
        per_cpu(cur_pgd, smp_processor_id()) = pgdir;   \
 } while (/* CONSTCOND */0)
 
index 568e84bc2fe6167d4809dda07fe7c9bf1921b8c9..5fe90fb87f445a7850c885362923ef5357fb5fc3 100644 (file)
@@ -71,17 +71,6 @@ void lgdt_finish(void);
  * be MACHINE addresses.
  */
 
-void queue_l1_entry_update(pte_t *ptr, unsigned long val);
-void queue_l2_entry_update(pmd_t *ptr, pmd_t val);
-void queue_pt_switch(unsigned long ptr);
-void queue_tlb_flush(void);
-void queue_invlpg(unsigned long ptr);
-void queue_pgd_pin(unsigned long ptr);
-void queue_pgd_unpin(unsigned long ptr);
-void queue_pte_pin(unsigned long ptr);
-void queue_pte_unpin(unsigned long ptr);
-void queue_set_ldt(unsigned long ptr, unsigned long bytes);
-void queue_machphys_update(unsigned long mfn, unsigned long pfn);
 void xen_l1_entry_update(pte_t *ptr, unsigned long val);
 void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
 void xen_pt_switch(unsigned long ptr);
@@ -94,8 +83,6 @@ void xen_pte_unpin(unsigned long ptr);
 void xen_set_ldt(unsigned long ptr, unsigned long bytes);
 void xen_machphys_update(unsigned long mfn, unsigned long pfn);
 
-void _flush_page_update_queue(void);
-
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 /* 
 ** XXX SMH: 2.4 doesn't have percpu.h (or support SMP guests) so just 
@@ -112,13 +99,6 @@ void _flush_page_update_queue(void);
 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
 #endif /* linux < 2.6.0 */
 
-#define flush_page_update_queue() do {                         \
-    DECLARE_PER_CPU(unsigned int, mmu_update_queue_idx);       \
-    if (per_cpu(mmu_update_queue_idx, smp_processor_id()))     \
-       _flush_page_update_queue();                             \
-} while (0)
-void MULTICALL_flush_page_update_queue(void);
-
 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
 /* Allocate a contiguous empty region of low memory. Return virtual start. */
 unsigned long allocate_empty_lowmem_region(unsigned long pages);
diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/multicall.h b/linux-2.6.11-xen-sparse/include/asm-xen/multicall.h
deleted file mode 100644 (file)
index 1f03675..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/******************************************************************************
- * multicall.h
- * 
- * Copyright (c) 2003-2004, K A Fraser
- * 
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- * 
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __MULTICALL_H__
-#define __MULTICALL_H__
-
-#include <asm-xen/hypervisor.h>
-
-DECLARE_PER_CPU(multicall_entry_t, multicall_list[]);
-DECLARE_PER_CPU(int, nr_multicall_ents);
-
-static inline void queue_multicall0(unsigned long op)
-{
-    int cpu = smp_processor_id();
-    int i = per_cpu(nr_multicall_ents, cpu);
-    per_cpu(multicall_list[i], cpu).op      = op;
-    per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall1(unsigned long op, unsigned long arg1)
-{
-    int cpu = smp_processor_id();
-    int i = per_cpu(nr_multicall_ents, cpu);
-    per_cpu(multicall_list[i], cpu).op      = op;
-    per_cpu(multicall_list[i], cpu).args[0] = arg1;
-    per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall2(
-    unsigned long op, unsigned long arg1, unsigned long arg2)
-{
-    int cpu = smp_processor_id();
-    int i = per_cpu(nr_multicall_ents, cpu);
-    per_cpu(multicall_list[i], cpu).op      = op;
-    per_cpu(multicall_list[i], cpu).args[0] = arg1;
-    per_cpu(multicall_list[i], cpu).args[1] = arg2;
-    per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall3(
-    unsigned long op, unsigned long arg1, unsigned long arg2,
-    unsigned long arg3)
-{
-    int cpu = smp_processor_id();
-    int i = per_cpu(nr_multicall_ents, cpu);
-    per_cpu(multicall_list[i], cpu).op      = op;
-    per_cpu(multicall_list[i], cpu).args[0] = arg1;
-    per_cpu(multicall_list[i], cpu).args[1] = arg2;
-    per_cpu(multicall_list[i], cpu).args[2] = arg3;
-    per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall4(
-    unsigned long op, unsigned long arg1, unsigned long arg2,
-    unsigned long arg3, unsigned long arg4)
-{
-    int cpu = smp_processor_id();
-    int i = per_cpu(nr_multicall_ents, cpu);
-    per_cpu(multicall_list[i], cpu).op      = op;
-    per_cpu(multicall_list[i], cpu).args[0] = arg1;
-    per_cpu(multicall_list[i], cpu).args[1] = arg2;
-    per_cpu(multicall_list[i], cpu).args[2] = arg3;
-    per_cpu(multicall_list[i], cpu).args[3] = arg4;
-    per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall5(
-    unsigned long op, unsigned long arg1, unsigned long arg2,
-    unsigned long arg3, unsigned long arg4, unsigned long arg5)
-{
-    int cpu = smp_processor_id();
-    int i = per_cpu(nr_multicall_ents, cpu);
-    per_cpu(multicall_list[i], cpu).op      = op;
-    per_cpu(multicall_list[i], cpu).args[0] = arg1;
-    per_cpu(multicall_list[i], cpu).args[1] = arg2;
-    per_cpu(multicall_list[i], cpu).args[2] = arg3;
-    per_cpu(multicall_list[i], cpu).args[3] = arg4;
-    per_cpu(multicall_list[i], cpu).args[4] = arg5;
-    per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void execute_multicall_list(void)
-{
-    int cpu = smp_processor_id();
-    if ( unlikely(per_cpu(nr_multicall_ents, cpu) == 0) ) return;
-    (void)HYPERVISOR_multicall(&per_cpu(multicall_list[0], cpu),
-                              per_cpu(nr_multicall_ents, cpu));
-    per_cpu(nr_multicall_ents, cpu) = 0;
-}
-
-#endif /* __MULTICALL_H__ */